*/
if ( d->arch.paging.hap.p2m_pages == 0 )
{
- pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
- d->arch.paging.hap.p2m_pages += 1;
+ pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
+ d->arch.paging.hap.p2m_pages += 1;
}
else
#endif
{
- pg = mfn_to_page(hap_alloc(d));
-
- d->arch.paging.hap.p2m_pages += 1;
- d->arch.paging.hap.total_pages -= 1;
- }
-
- if ( pg == NULL ) {
- hap_unlock(d);
- return NULL;
+ pg = mfn_to_page(hap_alloc(d));
+ d->arch.paging.hap.p2m_pages += 1;
+ d->arch.paging.hap.total_pages -= 1;
+ }
+
+ if ( pg == NULL )
+ {
+ hap_unlock(d);
+ return NULL;
}
hap_unlock(d);
{
ASSERT(page_get_owner(pg) == d);
/* Should have just the one ref we gave it in alloc_p2m_page() */
- if ( (pg->count_info & PGC_count_mask) != 1 ) {
+ if ( (pg->count_info & PGC_count_mask) != 1 )
HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
pg->count_info, pg->u.inuse.type_info);
- }
pg->count_info = 0;
/* Free should not decrement domain's total allocation, since
* these pages were allocated without an owner. */
page_set_owner(pg, NULL);
free_domheap_pages(pg, 0);
d->arch.paging.hap.p2m_pages--;
- ASSERT( d->arch.paging.hap.p2m_pages >= 0 );
+ ASSERT(d->arch.paging.hap.p2m_pages >= 0);
}
/* Return the size of the pool, rounded up to the nearest MB */
{
unsigned int pg = d->arch.paging.hap.total_pages;
- HERE_I_AM;
return ((pg >> (20 - PAGE_SHIFT))
+ ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
}
ASSERT(hap_locked_by_me(d));
- while ( d->arch.paging.hap.total_pages != pages ) {
- if ( d->arch.paging.hap.total_pages < pages ) {
+ while ( d->arch.paging.hap.total_pages != pages )
+ {
+ if ( d->arch.paging.hap.total_pages < pages )
+ {
/* Need to allocate more memory from domheap */
sp = alloc_domheap_pages(NULL, 0, 0);
- if ( sp == NULL ) {
+ if ( sp == NULL )
+ {
HAP_PRINTK("failed to allocate hap pages.\n");
return -ENOMEM;
}
d->arch.paging.hap.total_pages += 1;
list_add_tail(&sp->list, &d->arch.paging.hap.freelists);
}
- else if ( d->arch.paging.hap.total_pages > pages ) {
+ else if ( d->arch.paging.hap.total_pages > pages )
+ {
/* Need to return memory to domheap */
ASSERT(!list_empty(&d->arch.paging.hap.freelists));
sp = list_entry(d->arch.paging.hap.freelists.next,
}
/* Check to see if we need to yield and try again */
- if ( preempted && hypercall_preempt_check() ) {
+ if ( preempted && hypercall_preempt_check() )
+ {
*preempted = 1;
return 0;
}
for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
sl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
l2e_from_pfn(
- mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
- __PAGE_HYPERVISOR);
+ mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
+ __PAGE_HYPERVISOR);
for ( i = 0; i < HAP_L3_PAGETABLE_ENTRIES; i++ )
sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
unsigned int old_pages;
int rv = 0;
- HERE_I_AM;
-
domain_pause(d);
/* error check */
- if ( (d == current->domain) ) {
+ if ( (d == current->domain) )
+ {
rv = -EINVAL;
goto out;
}
old_pages = d->arch.paging.hap.total_pages;
- if ( old_pages == 0 ) {
+ if ( old_pages == 0 )
+ {
unsigned int r;
hap_lock(d);
r = hap_set_allocation(d, 256, NULL);
hap_unlock(d);
- if ( r != 0 ) {
+ if ( r != 0 )
+ {
hap_set_allocation(d, 0, NULL);
rv = -ENOMEM;
goto out;
}
/* allocate P2m table */
- if ( mode & PG_translate ) {
+ if ( mode & PG_translate )
+ {
rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
if ( rv != 0 )
goto out;
void hap_final_teardown(struct domain *d)
{
- HERE_I_AM;
-
if ( d->arch.paging.hap.total_pages != 0 )
hap_teardown(d);
p2m_teardown(d);
- ASSERT( d->arch.paging.hap.p2m_pages == 0 );
+ ASSERT(d->arch.paging.hap.p2m_pages == 0);
}
void hap_teardown(struct domain *d)
{
struct vcpu *v;
mfn_t mfn;
- HERE_I_AM;
ASSERT(d->is_dying);
ASSERT(d != current->domain);
if ( !hap_locked_by_me(d) )
hap_lock(d); /* Keep various asserts happy */
- if ( paging_mode_enabled(d) ) {
+ if ( paging_mode_enabled(d) )
+ {
/* release the monitor table held by each vcpu */
- for_each_vcpu(d, v) {
- if ( v->arch.paging.mode && paging_mode_external(d) ) {
+ for_each_vcpu ( d, v )
+ {
+ if ( v->arch.paging.mode && paging_mode_external(d) )
+ {
mfn = pagetable_get_mfn(v->arch.monitor_table);
if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
hap_destroy_monitor_table(v, mfn);
}
}
- if ( d->arch.paging.hap.total_pages != 0 ) {
+ if ( d->arch.paging.hap.total_pages != 0 )
+ {
HAP_PRINTK("teardown of domain %u starts."
" pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
{
int rc, preempted = 0;
- HERE_I_AM;
-
- switch ( sc->op ) {
+ switch ( sc->op )
+ {
case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
hap_lock(d);
rc = hap_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
{
struct domain *d;
- HERE_I_AM;
-
d = v->domain;
hap_lock(d);
* guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
* reflect guest's status correctly.
*/
- if ( hvm_paging_enabled(v) ) {
+ if ( hvm_paging_enabled(v) )
+ {
if ( hvm_long_mode_enabled(v) )
v->arch.paging.mode = &hap_paging_long_mode;
else if ( hvm_pae_enabled(v) )
else
v->arch.paging.mode = &hap_paging_protected_mode;
}
- else {
+ else
+ {
v->arch.paging.mode = &hap_paging_real_mode;
}
v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
- if ( pagetable_is_null(v->arch.monitor_table) ) {
+ if ( pagetable_is_null(v->arch.monitor_table) )
+ {
mfn_t mmfn = hap_make_monitor_table(v);
v->arch.monitor_table = pagetable_from_mfn(mmfn);
make_cr3(v, mfn_x(mmfn));
index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
ASSERT(index < MACHPHYS_MBYTES>>1);
- for_each_vcpu(d, v) {
- if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
- continue;
+ for_each_vcpu ( d, v )
+ {
+ if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
+ continue;
- ASSERT(paging_mode_external(v->domain));
+ ASSERT(paging_mode_external(v->domain));
if ( v == current ) /* OK to use linear map of monitor_table */
- ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
+ ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
else {
- l3_pgentry_t *ml3e;
- ml3e = hap_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
- ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
+ l3_pgentry_t *ml3e;
+ ml3e = hap_map_domain_page(
+ pagetable_get_mfn(v->arch.monitor_table));
+ ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
ml2e += l2_table_offset(RO_MPT_VIRT_START);
- hap_unmap_domain_page(ml3e);
+ hap_unmap_domain_page(ml3e);
}
- ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
+ ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
if ( v != current )
hap_unmap_domain_page(ml2e);
}
#if CONFIG_PAGING_LEVELS == 3
/* install P2M in monitor table for PAE Xen */
if ( level == 3 )
- /* We have written to the p2m l3: need to sync the per-vcpu
+ /* We have written to the p2m l3: need to sync the per-vcpu
* copies of it in the monitor tables */
- p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
+ p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
#endif
hap_unlock(v->domain);
*/
unsigned long hap_gva_to_gfn_real_mode(struct vcpu *v, unsigned long gva)
{
- HERE_I_AM;
return ((paddr_t)gva >> PAGE_SHIFT);
}
l2_pgentry_32_t *l2e; /* guest page entry size is 32-bit */
l1_pgentry_32_t *l1e;
- HERE_I_AM;
-
gpfn = (gcr3 >> PAGE_SHIFT);
- for ( lev = mode; lev >= 1; lev-- ) {
- mfn = get_mfn_from_gpfn( gpfn );
- if ( mfn == INVALID_MFN ) {
+ for ( lev = mode; lev >= 1; lev-- )
+ {
+ mfn = get_mfn_from_gpfn(gpfn);
+ if ( mfn == INVALID_MFN )
+ {
HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva,
lev);
success = 0;
}
index = (gva >> PT_SHIFT[mode][lev]) & (PT_ENTRIES[mode][lev]-1);
- if ( lev == 2 ) {
- l2e = map_domain_page( mfn );
+ if ( lev == 2 )
+ {
+ l2e = map_domain_page(mfn);
HAP_PRINTK("l2 page table entry is %ulx at index = %d\n",
l2e[index].l2, index);
- if ( !(l2e_get_flags_32(l2e[index]) & _PAGE_PRESENT) ) {
+ if ( !(l2e_get_flags_32(l2e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 2 entry not present at index = %d\n", index);
success = 0;
}
- if ( l2e_get_flags_32(l2e[index]) & _PAGE_PSE ) { /* handle PSE */
+ if ( l2e_get_flags_32(l2e[index]) & _PAGE_PSE )
+ {
HAP_PRINTK("guest page table is PSE\n");
- if ( l2e_get_intpte(l2e[index]) & 0x001FE000UL ) { /*[13:20] */
+ if ( l2e_get_intpte(l2e[index]) & 0x001FE000UL ) /*[13:20] */
+ {
printk("guest physical memory size is too large!\n");
domain_crash(v->domain);
}
unmap_domain_page(l2e);
break; /* last level page table, return from here */
}
- else {
- gpfn = l2e_get_pfn( l2e[index] );
- }
+
+ gpfn = l2e_get_pfn(l2e[index]);
unmap_domain_page(l2e);
}
- if ( lev == 1 ) {
- l1e = map_domain_page( mfn );
+ if ( lev == 1 )
+ {
+ l1e = map_domain_page(mfn);
HAP_PRINTK("l1 page table entry is %ulx at index = %d\n",
l1e[index].l1, index);
- if ( !(l1e_get_flags_32(l1e[index]) & _PAGE_PRESENT) ) {
+ if ( !(l1e_get_flags_32(l1e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 1 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l1e_get_pfn( l1e[index] );
+ gpfn = l1e_get_pfn(l1e[index]);
gpa = (l1e_get_intpte(l1e[index]) & PHYSICAL_PAGE_4K_MASK) +
- (gva & ~PHYSICAL_PAGE_4K_MASK);
+ (gva & ~PHYSICAL_PAGE_4K_MASK);
unmap_domain_page(l1e);
}
HAP_PRINTK("success = %d, gva = %lx, gpa = %lx\n", success, gva, gpa);
- if ( !success ) /* error happened */
- return INVALID_GFN;
- else
- return ((paddr_t)gpa >> PAGE_SHIFT);
+ return (!success ? INVALID_GFN : ((paddr_t)gpa >> PAGE_SHIFT));
}
l2_pgentry_t *l2e;
l3_pgentry_t *l3e;
- HERE_I_AM;
-
gpfn = (gcr3 >> PAGE_SHIFT);
- for ( lev = mode; lev >= 1; lev-- ) {
- mfn = get_mfn_from_gpfn( gpfn );
- if ( mfn == INVALID_MFN ) {
+ for ( lev = mode; lev >= 1; lev-- )
+ {
+ mfn = get_mfn_from_gpfn(gpfn);
+ if ( mfn == INVALID_MFN )
+ {
HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva,
lev);
success = 0;
}
index = (gva >> PT_SHIFT[mode][lev]) & (PT_ENTRIES[mode][lev]-1);
- if ( lev == 3 ) {
- l3e = map_domain_page( mfn );
- index += ( ((gcr3 >> 5 ) & 127 ) * 4 );
- if ( !(l3e_get_flags(l3e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 3 )
+ {
+ l3e = map_domain_page(mfn);
+ index += ((gcr3 >> 5) & 127) * 4;
+ if ( !(l3e_get_flags(l3e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 3 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l3e_get_pfn( l3e[index] );
+ gpfn = l3e_get_pfn(l3e[index]);
unmap_domain_page(l3e);
}
- if ( lev == 2 ) {
- l2e = map_domain_page( mfn );
- if ( !(l2e_get_flags(l2e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 2 )
+ {
+ l2e = map_domain_page(mfn);
+ if ( !(l2e_get_flags(l2e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 2 entry not present at index = %d\n", index);
success = 0;
}
- if ( l2e_get_flags(l2e[index]) & _PAGE_PSE ) { /* handle PSE */
+ if ( l2e_get_flags(l2e[index]) & _PAGE_PSE )
+ {
HAP_PRINTK("guest page table is PSE\n");
gpa = (l2e_get_intpte(l2e[index]) & PHYSICAL_PAGE_2M_MASK) +
(gva & ~PHYSICAL_PAGE_2M_MASK);
unmap_domain_page(l2e);
break; /* last level page table, jump out from here */
}
- else {
- gpfn = l2e_get_pfn(l2e[index]);
- }
+
+ gpfn = l2e_get_pfn(l2e[index]);
unmap_domain_page(l2e);
}
- if ( lev == 1 ) {
- l1e = map_domain_page( mfn );
- if ( !(l1e_get_flags(l1e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 1 )
+ {
+ l1e = map_domain_page(mfn);
+ if ( !(l1e_get_flags(l1e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 1 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l1e_get_pfn( l1e[index] );
+ gpfn = l1e_get_pfn(l1e[index]);
gpa = (l1e_get_intpte(l1e[index]) & PHYSICAL_PAGE_4K_MASK) +
(gva & ~PHYSICAL_PAGE_4K_MASK);
unmap_domain_page(l1e);
gpa &= ~PAGE_NX_BIT; /* clear NX bit of guest physical address */
HAP_PRINTK("success = %d, gva = %lx, gpa = %lx\n", success, gva, gpa);
- if ( !success )
- return INVALID_GFN;
- else
- return ((paddr_t)gpa >> PAGE_SHIFT);
+ return (!success ? INVALID_GFN : ((paddr_t)gpa >> PAGE_SHIFT));
#else
- HERE_I_AM;
printk("guest paging level (3) is greater than host paging level!\n");
domain_crash(v->domain);
return INVALID_GFN;
}
-
/* Translate guest virtual address to guest physical address. Specifically
* for long mode guest.
*/
l2_pgentry_t *l2e;
l1_pgentry_t *l1e;
- HERE_I_AM;
-
gpfn = (gcr3 >> PAGE_SHIFT);
- for ( lev = mode; lev >= 1; lev-- ) {
- mfn = get_mfn_from_gpfn( gpfn );
- if ( mfn == INVALID_MFN ) {
+ for ( lev = mode; lev >= 1; lev-- )
+ {
+ mfn = get_mfn_from_gpfn(gpfn);
+ if ( mfn == INVALID_MFN )
+ {
HAP_PRINTK("bad pfn=0x%lx from gva=0x%lx at lev%d\n", gpfn, gva,
lev);
success = 0;
}
index = (gva >> PT_SHIFT[mode][lev]) & (PT_ENTRIES[mode][lev]-1);
- if ( lev == 4 ) {
- l4e = map_domain_page( mfn );
- if ( !(l4e_get_flags(l4e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 4 )
+ {
+ l4e = map_domain_page(mfn);
+ if ( !(l4e_get_flags(l4e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 4 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l4e_get_pfn( l4e[index] );
+ gpfn = l4e_get_pfn(l4e[index]);
unmap_domain_page(l4e);
}
- if ( lev == 3 ) {
- l3e = map_domain_page( mfn );
- if ( !(l3e_get_flags(l3e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 3 )
+ {
+ l3e = map_domain_page(mfn);
+ if ( !(l3e_get_flags(l3e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 3 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l3e_get_pfn( l3e[index] );
+ gpfn = l3e_get_pfn(l3e[index]);
unmap_domain_page(l3e);
}
- if ( lev == 2 ) {
- l2e = map_domain_page( mfn );
- if ( !(l2e_get_flags(l2e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 2 )
+ {
+ l2e = map_domain_page(mfn);
+ if ( !(l2e_get_flags(l2e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 2 entry not present at index = %d\n", index);
success = 0;
}
- if ( l2e_get_flags(l2e[index]) & _PAGE_PSE ) { /* handle PSE */
+ if ( l2e_get_flags(l2e[index]) & _PAGE_PSE )
+ {
HAP_PRINTK("guest page table is PSE\n");
gpa = (l2e_get_intpte(l2e[index]) & PHYSICAL_ADDR_2M_MASK_LM)
+ (gva & ~PHYSICAL_PAGE_2M_MASK);
unmap_domain_page(l2e);
break; /* last level page table, jump out from here */
}
- else {
- gpfn = l2e_get_pfn(l2e[index]);
- }
+
+ gpfn = l2e_get_pfn(l2e[index]);
unmap_domain_page(l2e);
}
- if ( lev == 1 ) {
- l1e = map_domain_page( mfn );
- if ( !(l1e_get_flags(l1e[index]) & _PAGE_PRESENT) ) {
+ if ( lev == 1 )
+ {
+ l1e = map_domain_page(mfn);
+ if ( !(l1e_get_flags(l1e[index]) & _PAGE_PRESENT) )
+ {
HAP_PRINTK("Level 1 entry not present at index = %d\n", index);
success = 0;
}
- gpfn = l1e_get_pfn( l1e[index] );
+ gpfn = l1e_get_pfn(l1e[index]);
gpa = (l1e_get_intpte(l1e[index]) & PHYSICAL_ADDR_4K_MASK_LM) +
(gva & ~PHYSICAL_PAGE_4K_MASK);
unmap_domain_page(l1e);
gpa &= ~PAGE_NX_BIT; /* clear NX bit of guest physical address */
HAP_PRINTK("success = %d, gva = %lx, gpa = %lx\n", success, gva, gpa);
- if ( !success )
- return INVALID_GFN;
- else
- return ((paddr_t)gpa >> PAGE_SHIFT);
+ return (!success ? INVALID_GFN : ((paddr_t)gpa >> PAGE_SHIFT));
#else
- HERE_I_AM;
printk("guest paging level (4) is greater than host paging level!\n");
domain_crash(v->domain);
return INVALID_GFN;